Close

@InProceedings{BenatoTeleFalc:2021:ItPsDe,
               author = "Benato, Barbara Caroline and Telea, Alexandru Cristian and 
                         Falc{\~a}o, Alexandre Xavier",
          affiliation = "{University of Campinas } and {Utrecht University } and 
                         {University of Campinas}",
                title = "Iterative Pseudo-Labeling with Deep Feature Annotation and 
                         Confidence-Based Sampling",
            booktitle = "Proceedings...",
                 year = "2021",
               editor = "Paiva, Afonso and Menotti, David and Baranoski, Gladimir V. G. and 
                         Proen{\c{c}}a, Hugo Pedro and Junior, Antonio Lopes Apolinario 
                         and Papa, Jo{\~a}o Paulo and Pagliosa, Paulo and dos Santos, 
                         Thiago Oliveira and e S{\'a}, Asla Medeiros and da Silveira, 
                         Thiago Lopes Trugillo and Brazil, Emilio Vital and Ponti, Moacir 
                         A. and Fernandes, Leandro A. F. and Avila, Sandra",
         organization = "Conference on Graphics, Patterns and Images, 34. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "semi-supervised learning, pseudolabels, optimum path forest, data 
                         annotation.",
             abstract = "Training deep neural networks is challenging when large and 
                         annotated datasets are unavailable. Extensive manual annotation of 
                         data samples is time-consuming, expensive, and error-prone, 
                         notably when it needs to be done by experts. To address this 
                         issue, increased attention has been devoted to techniques that 
                         propagate uncertain labels (also called pseudo labels) to large 
                         amounts of unsupervised samples and use them for training the 
                         model. However, these techniques still need hundreds of supervised 
                         samples per class in the training set and a validation set with 
                         extra supervised samples to tune the model. We improve a recent 
                         iterative pseudo-labeling technique, Deep Feature Annotation 
                         (DeepFA), by selecting the most confident unsupervised samples to 
                         iteratively train a deep neural network. Our confidence-based 
                         sampling strategy relies on only dozens of annotated training 
                         samples per class with no validation set, considerably reducing 
                         user effort in data annotation. We first ascertain the best 
                         configuration for the baseline a self-trained deep neural network 
                         and then evaluate our confidence DeepFA for different confidence 
                         thresholds. Experiments on six datasets show that DeepFA already 
                         outperforms the self-trained baseline, but confidence DeepFA can 
                         considerably outperform the original DeepFA and the baseline.",
  conference-location = "Gramado, RS, Brazil (virtual)",
      conference-year = "18-22 Oct. 2021",
                  doi = "10.1109/SIBGRAPI54419.2021.00034",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI54419.2021.00034",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/45CUD68",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/45CUD68",
           targetfile = "2021_sibgrapi_Benato-2.pdf",
        urlaccessdate = "2024, May 06"
}


Close